Данные представляют собой аннотированные слова, записанные от 6 носителей в селении Красный Восток:
ls raw_data
## d23_stress.TextGrid
## d23_stress.wav
## d25_stress.TextGrid
## d25_stress.wav
## d26_stress.TextGrid
## d26_stress.wav
## d27_stress.TextGrid
## d27_stress.wav
## d28_stress.TextGrid
## d28_stress.wav
## d30_stress.TextGrid
## d30_stress.wav
От каждого носителя было записано от 74 до 79 стимульных слов:
library(phonfieldwork)
textgrids <- read_from_folder("raw_data", "textgrid")
textgrids %>%
filter(tier == 1) %>%
distinct(source, content) %>%
count(source)
Чего мы удалили? Про это нужно написать.
textgrids %>%
filter(tier_name %in% c("labels", "error")) %>%
select(-tier, -id) %>%
pivot_wider(names_from = tier_name, values_from = content) %>%
mutate(speaker = str_remove(source, ".TextGrid"),
word = labels) %>%
filter(!is.na(error), error != "") %>%
select(speaker, word) ->
to_remove
to_remove
Дальше следует описание данных, см. Appendix
draw_sound(file_name = "raw_data/d23_stress.wav",
annotation = "raw_data/d23_stress.TextGrid",
from = 0,
to = 10.6,
zoom = c(3.8, 4.4))
После этого мы применяем к этим данным вот этот скрипт:
cat get_pitch_intencity.praat
## # This is a Praat script made for investigation of Abaza vowels. It analyses multiple selected sounds
## # (TextGrids should be also uploaded to Praat Obects). The file should have the following structure:
## # * first tier --- word label
## # * second tier --- translation label
## # * third tier --- sound label
## # * fourth tier --- utterance label
##
##
## # This script is distributed under the GNU General Public License.
## # George Moroz 09.05.2022
##
## form Get Pitch listing from a file
## comment Where should the script write a result file
## text directory /home/agricolamz/for_work/HSE/students/2022_Kuznetsova/data/
## comment How should the script name a result file
## text resultfile log.txt
## comment Time step
## real step 0.01
## comment Pitch floor (Hz)
## integer floor 90
## comment Pitch ceiling (Hz)
## integer ceiling 250
## comment 5. formant ceiling (Hz)
## integer fceiling 5500
## comment Minimum pitch for intensity (Hz)
## integer mpitch 200
## endform
##
## n = numberOfSelected("Sound")
## for j to n
## sound[j] = selected("Sound", j)
## endfor
## for k to n
## selectObject: sound[k]
## object_name$ = selected$ ("Sound")
## select TextGrid 'object_name$'
## number_of_intervals = Get number of intervals... 3
## for b from 1 to number_of_intervals
## select TextGrid 'object_name$'
## interval_label$ = Get label of interval... 3 'b'
## utterance$ = Get label of interval... 4 'b'
## if interval_label$ <> ""
## start = Get starting point... 3 'b'
## end = Get end point... 3 'b'
## duration = end - start
## int_1 = Get interval at time... 1 end
## word$ = Get label of interval... 1 int_1
## trans$ = Get label of interval... 2 int_1
## select Sound 'object_name$'
## s = Extract part: start, end, "rectangular", 1, "yes"
## select s
## fragment_name$ = selected$ ("Sound")
## pitch = To Pitch... step floor ceiling
## selectObject: s
## formant = To Formant (burg): 0, 5, fceiling, 0.025, 50
## selectObject: s
## intensity = To Intensity: mpitch, 0, "no"
## i = start
## while i <= end
## select Pitch 'fragment_name$'
## f0 = Get value at time... 'i' Hertz Linear
## select Formant 'fragment_name$'
## f1 = Get value at time: 1, i, "Hertz", "Linear"
## f2 = Get value at time: 2, i, "Hertz", "Linear"
## f3 = Get value at time: 3, i, "Hertz", "Linear"
## select Intensity 'fragment_name$'
## intvalue = Get value at time: 'i', "cubic"
## i = i + 0.01
## fileappend "'directory$''resultfile$'" 'object_name$''tab$''interval_label$''tab$''utterance$''tab$''word$''tab$''trans$''tab$''f0''tab$''f1''tab$''f2''tab$''f3''tab$''intvalue''tab$''duration''tab$''i''newline$'
## endwhile
## removeObject: s
## removeObject: pitch
## removeObject: formant
## removeObject: intensity
## endif
## endfor
## # removeObject: "Sound 'object_name$'"
## # removeObject: "TextGrid 'object_name$'"
## endfor
Скрипт идет по аннотации и с шагом в 1 мс и базовыми настройками,
отраженными в меню (между form и endform),
берет значения длительности, f0, f1, f2, f3 и инетнсивности. Получается
вот такая вот таблица:
df <- read_tsv("data/log.txt", col_names = FALSE)
colnames(df) <- c("speaker", "vowel", "utterance", "word", "translation", "f0", "f1", "f2", "f3", "intensity", "duration", "step")
df
Дальше с данными можно делать разное, я просто возьму среднее по всем параметрами и создам переменную minimal_pair, которая будет включать минимальную пару:
df %>%
mutate_all(function(i){str_replace(i, "--undefined--", NA_character_)}) %>%
mutate(across(f0:duration, as.double),
stressed = ifelse(vowel == toupper(vowel), "stressed", "unstressed"),
stressed = factor(stressed, levels = c("unstressed", "stressed")),
vowel_n = str_extract(utterance, "V\\d"),
vowel_n = as.double(str_remove(vowel_n, "V")),
utterance = str_extract(utterance, "u\\d"),
utterance = as.double(str_remove(utterance, "u"))) %>%
filter(str_detect(vowel, "[аА]"),
utterance <= 4) %>%
select(-step) %>%
anti_join(to_remove) %>%
group_by(speaker, utterance, stressed, word, vowel_n) %>%
summarise(f0 = mean(f0, na.rm = TRUE),
f1 = mean(f1, na.rm = TRUE),
f2 = mean(f2, na.rm = TRUE),
f3 = mean(f3, na.rm = TRUE),
intensity = mean(intensity, na.rm = TRUE),
duration = mean(duration, na.rm = TRUE)) %>%
mutate(vowel_n = str_c(vowel_n, ". syllable"),
utterance = str_c(utterance, ". utterance"),
duration = duration*1000) %>%
ungroup() %>%
mutate(word_pair = as.double(factor(tolower(word)))) %>%
group_by(word_pair) %>%
mutate(minimal_pair = str_c(unique(str_c(word, "_")), collapse = ""),
minimal_pair = str_remove(minimal_pair, "_$")) %>%
ungroup() ->
mean_values
mean_values
Вы спрашивали, какие минимальные пары вошли в анализ?
mean_values %>%
distinct(speaker, word_pair, minimal_pair) %>%
pivot_wider(names_from = speaker, values_from = minimal_pair) %>%
select(-word_pair)
График с разницами между минимальными парами (ударный слог минус безударный). Если разницы нет, то горб должен возвышаться над 0. Если же он от нуля смещен, то значит разница между ударными и безударными слогами есть.
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, duration) %>%
pivot_wider(names_from = stressed, values_from = duration) %>%
mutate(duration_differance = stressed-unstressed) %>%
ggplot(aes(duration_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, f0) %>%
pivot_wider(names_from = stressed, values_from = f0) %>%
mutate(f0_differance = stressed-unstressed) %>%
ggplot(aes(f0_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, intensity) %>%
pivot_wider(names_from = stressed, values_from = intensity) %>%
mutate(intensity_differance = stressed-unstressed) %>%
ggplot(aes(intensity_differance, fill = vowel_n))+
geom_density(alpha = 0.4)+
facet_grid(speaker~utterance, scales = "free")
Я использую байесовскую логистическую регрессию со смешанными эффектами с дефолтными прайерами и формула выглядит вот так:
stressed ~ ПЕРЕМЕННАЯ * vowel_n + (1|speaker) + (1|minimal_pair/utterance)
Получается взаимодействие переменной и номера гласного в слове с носителем в смешанных эффектах и номером произнесения вложенным в минимальную пару в другом смешанном эффекте.
library(brms)
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, duration) %>%
brm(stressed ~ duration*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_duration
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000709 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.09 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 101.365 seconds (Warm-up)
## Chain 1: 38.3713 seconds (Sampling)
## Chain 1: 139.736 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000615 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 6.15 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 94.2461 seconds (Warm-up)
## Chain 2: 36.8635 seconds (Sampling)
## Chain 2: 131.11 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.000836 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 8.36 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 99.0993 seconds (Warm-up)
## Chain 3: 36.7829 seconds (Sampling)
## Chain 3: 135.882 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.00057 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 5.7 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 97.9656 seconds (Warm-up)
## Chain 4: 37.2195 seconds (Sampling)
## Chain 4: 135.185 seconds (Total)
## Chain 4:
conditional_effects(fit_duration,
effects = c("duration:vowel_n"))
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, f0) %>%
brm(stressed ~ f0*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_f0
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000781 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.81 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 100.545 seconds (Warm-up)
## Chain 1: 38.9948 seconds (Sampling)
## Chain 1: 139.54 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000531 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 5.31 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 94.412 seconds (Warm-up)
## Chain 2: 20.8405 seconds (Sampling)
## Chain 2: 115.252 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.000701 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 7.01 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 94.915 seconds (Warm-up)
## Chain 3: 37.2815 seconds (Sampling)
## Chain 3: 132.197 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.000515 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 5.15 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 93.8106 seconds (Warm-up)
## Chain 4: 37.1562 seconds (Sampling)
## Chain 4: 130.967 seconds (Total)
## Chain 4:
conditional_effects(fit_f0,
effects = c("f0:vowel_n"))
mean_values %>%
select(speaker, utterance, stressed, minimal_pair, vowel_n, intensity) %>%
brm(stressed ~ intensity*vowel_n + (1|speaker) + (vowel_n+1|minimal_pair/utterance),
family = bernoulli(),
data = .) ->
fit_intensity
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000549 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 5.49 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 136.772 seconds (Warm-up)
## Chain 1: 131.262 seconds (Sampling)
## Chain 1: 268.034 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.000513 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 5.13 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 125.282 seconds (Warm-up)
## Chain 2: 128.484 seconds (Sampling)
## Chain 2: 253.766 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.000649 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 6.49 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 134.906 seconds (Warm-up)
## Chain 3: 130.783 seconds (Sampling)
## Chain 3: 265.688 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'e67f1fb651efe88012e69c4f524f5c99' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.000521 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 5.21 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 137.674 seconds (Warm-up)
## Chain 4: 152.032 seconds (Sampling)
## Chain 4: 289.706 seconds (Total)
## Chain 4:
conditional_effects(fit_intensity,
effects = c("intensity:vowel_n"))
textgrids %>%
filter(tier == 1) %>%
distinct(content, source) %>%
rename(word = content,
speaker = source) %>%
mutate(speaker = str_extract(speaker, "d\\d\\d"))
Нужно в IPA перевести… https://github.com/agricolamz/abaza_cyrillic_to_trans